goto pin_page;
case MMUEXT_UNPIN_TABLE:
- if ( unlikely(!(okay = get_page_from_pagenr(mfn, FOREIGNDOM))) )
+ if ( unlikely(!(okay = get_page_from_pagenr(mfn, d))) )
{
MEM_LOG("Mfn %lx bad domain (dom=%p)",
mfn, page_get_owner(page));
{
l1_pgentry_t ol1e, nl1e;
int modified = 0, i;
+ struct vcpu *v;
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
*/
memcpy(&l1page[i], &snapshot[i],
(L1_PAGETABLE_ENTRIES - i) * sizeof(l1_pgentry_t));
- domain_crash();
+
+ /* Crash the offending domain. */
+ set_bit(_DOMF_ctrl_pause, &d->domain_flags);
+ for_each_vcpu ( d, v )
+ vcpu_sleep_nosync(v);
break;
}
modified = revalidate_l1(d, pl1e, d->arch.ptwr[which].page);
unmap_domain_page(pl1e);
perfc_incr_histo(wpt_updates, modified, PT_UPDATES);
- ptwr_eip_stat_update( d->arch.ptwr[which].eip, d->domain_id, modified);
- d->arch.ptwr[which].prev_nr_updates = modified;
+ ptwr_eip_stat_update(d->arch.ptwr[which].eip, d->domain_id, modified);
+ d->arch.ptwr[which].prev_nr_updates = modified;
/*
* STEP 3. Reattach the L1 p.t. page into the current address space.
void ptwr_destroy(struct domain *d)
{
+ LOCK_BIGLOCK(d);
cleanup_writable_pagetable(d);
+ UNLOCK_BIGLOCK(d);
free_xenheap_page(d->arch.ptwr[PTWR_PT_ACTIVE].page);
free_xenheap_page(d->arch.ptwr[PTWR_PT_INACTIVE].page);
}
atomic_inc(&v->pausecnt);
vcpu_sleep_sync(v);
}
+
+ sync_pagetable_state(d);
}
void vcpu_unpause(struct vcpu *v)
for_each_vcpu ( d, v )
vcpu_sleep_sync(v);
}
+
+ sync_pagetable_state(d);
}
void domain_unpause_by_systemcontroller(struct domain *d)
int revalidate_l1(struct domain *, l1_pgentry_t *, l1_pgentry_t *);
void cleanup_writable_pagetable(struct domain *d);
-#define sync_pagetable_state(d) cleanup_writable_pagetable(d)
+#define sync_pagetable_state(d) \
+ do { \
+ LOCK_BIGLOCK(d); \
+ cleanup_writable_pagetable(d); \
+ UNLOCK_BIGLOCK(d); \
+ } while ( 0 )
int audit_adjust_pgtables(struct domain *d, int dir, int noisy);